type runtime.m

69 uses

	runtime (current package)
		cgocall.go#L231: func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
		coro.go#L32: 	mp        *m
		debug.go#L161: 	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
		debug.go#L171: 	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
		lock_futex.go#L136: func semacreate(mp *m) {}
		lock_futex.go#L158: func semawakeup(mp *m) {
		lock_spinbit.go#L417: 	var committed *m // If we choose an M within the stack, we've made a promise to wake it
		malloc.go#L1828: func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
		mcache.go#L128: func getMCache(mp *m) *mcache {
		mheap.go#L2521: func gcParkStrongFromWeak() *m {
		mprof.go#L437: func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
		mprof.go#L1226: 	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
		os_linux.go#L173: func newosproc(mp *m) {
		os_linux.go#L390: func mpreinit(mp *m) {
		os_linux.go#L422: func mdestroy(mp *m) {
		os_linux.go#L570: func signalM(mp *m, sig int) {
		os_linux.go#L581: func validSIGPROF(mp *m, c *sigctxt) bool {
		preempt.go#L119: 	var asyncM *m
		preempt.go#L287: func canPreemptM(mp *m) bool {
		preempt_nonwindows.go#L10: func osPreemptExtEnter(mp *m) {}
		preempt_nonwindows.go#L13: func osPreemptExtExit(mp *m) {}
		proc.go#L118: 	m0           m
		proc.go#L997: func mcommoninit(mp *m, id int64) {
		proc.go#L1040: func mProfStackInit(mp *m) {
		proc.go#L1070: func (mp *m) becomeSpinning() {
		proc.go#L1083: func (mp *m) snapshotAllp() []*p {
		proc.go#L1094: func (mp *m) clearAllpSnapshot() {
		proc.go#L1098: func (mp *m) hasCgoOnStack() bool {
		proc.go#L2284: func allocm(pp *p, fn func(), id int64) *m {
		proc.go#L2301: 		var newList *m
		proc.go#L2748: func lockextra(nilokay bool) *m {
		proc.go#L2770: 			return (*m)(unsafe.Pointer(old))
		proc.go#L2778: func unlockextra(mp *m, delta int32) {
		proc.go#L2790: func getExtraM() (mp *m, last bool) {
		proc.go#L2801: func putExtraM(mp *m) {
		proc.go#L2809: func addExtraM(mp *m) {
		proc.go#L2911: func newm1(mp *m) {
		proc.go#L5592: func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
		proc.go#L5696: 		var mp *m
		proc.go#L6825: func mput(mp *m) {
		proc.go#L6839: func mget() *m {
		rand.go#L188: func mrandinit(mp *m) {
		runtime1.go#L628: func acquirem() *m {
		runtime1.go#L635: func releasem(mp *m) {
		runtime2.go#L283: func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
		runtime2.go#L286: func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
		runtime2.go#L293: func setMNoWB(mp **m, new *m) {
		runtime2.go#L408: 	m         *m      // current m; offset known to arm liblink
		runtime2.go#L532: type m struct {
		runtime2.go#L574: 	alllink         *m // on allm
		runtime2.go#L593: 	freelink    *m // on sched.freem
		runtime2.go#L633: 	m
		runtime2.go#L639: 	_ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte
		runtime2.go#L826: 	freem *m
		runtime2.go#L1212: 	allm          *m
		signal_unix.go#L368: func preemptM(mp *m) {
		signal_unix.go#L555: func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
		signal_unix.go#L595: func adjustSignalStack2(sig uint32, sp uintptr, mp *m, ssDisable bool) {
		signal_unix.go#L839: func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g {
		signal_unix.go#L1112: func sigNotOnStack(sig uint32, sp uintptr, mp *m) {
		tls_stub.go#L10: func osSetupTLS(mp *m) {}
		tracecpu.go#L213: func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
		traceruntime.go#L30: 	link          *m                                   // Snapshot of alllink or freelink.
		traceruntime.go#L171: 	mp  *m
		traceruntime.go#L684: func traceThreadDestroy(mp *m) {
		tracestack.go#L36: 	var mp *m
		vgetrandom_linux.go#L80: func vgetrandomDestroy(mp *m) {